In [1]:
import tensorflow as tf
import numpy as np
from tensorflow import data
import shutil
import math
from datetime import datetime
from tensorflow.python.feature_column import feature_column

print(tf.__version__)


/Users/khalidsalama/anaconda/lib/python3.6/importlib/_bootstrap.py:205: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6
  return f(*args, **kwds)
1.4.0

Steps to use the TF Estimator (Train_And_Evaluate) APIs

  1. Define dataset metadata
  2. Define data input function to read the data from .tfrecord files + feature processing
  3. Create TF feature columns based on metadata + extended feature columns
  4. Define an estimator (LinearCombinedDNNRegressor) with the required feature columns (wide/deep) & parameters
  5. Run an experiment using the estimator train_and_evaluate function to train, evaluate, and export the model
  6. Evaluate the model using test data
  7. Perform predictions & serving the exported model

In [2]:
MODEL_NAME = 'reg-model-06'

TRAIN_DATA_FILES_PATTERN = 'data/train-*.tfrecords'
VALID_DATA_FILES_PATTERN = 'data/valid-*.tfrecords'
TEST_DATA_FILES_PATTERN = 'data/test-*.tfrecords'

RESUME_TRAINING = False
PROCESS_FEATURES = True
EXTEND_FEATURE_COLUMNS = True
MULTI_THREADING = True

1. Define Dataset Metadata

  • tf.example feature names and defaults
  • Numeric and categorical feature names
  • Target feature name
  • Unused features

In [3]:
HEADER = ['key','x','y','alpha','beta','target']
HEADER_DEFAULTS = [[0], [0.0], [0.0], ['NA'], ['NA'], [0.0]]

NUMERIC_FEATURE_NAMES = ['x', 'y']  

CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY = {'alpha':['ax01', 'ax02'], 'beta':['bx01', 'bx02']}
CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.keys())

FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES

TARGET_NAME = 'target'

UNUSED_FEATURE_NAMES = list(set(HEADER) - set(FEATURE_NAMES) - {TARGET_NAME})

print("Header: {}".format(HEADER))
print("Numeric Features: {}".format(NUMERIC_FEATURE_NAMES))
print("Categorical Features: {}".format(CATEGORICAL_FEATURE_NAMES))
print("Target: {}".format(TARGET_NAME))
print("Unused Features: {}".format(UNUSED_FEATURE_NAMES))


Header: ['key', 'x', 'y', 'alpha', 'beta', 'target']
Numeric Features: ['x', 'y']
Categorical Features: ['alpha', 'beta']
Target: target
Unused Features: ['key']

2. Define Data Input Function

  • Input .tfrecords files name pattern
  • Use TF Dataset APIs to read and process the data
  • Parse tf.exmaples to feature tensors
  • Apply feature processing
  • Return (features, target) tensors

a. Parsing and preprocessing logic


In [4]:
def parse_tf_example(example_proto):

    feature_spec = {}

    for feature_name in NUMERIC_FEATURE_NAMES:
        feature_spec[feature_name] = tf.FixedLenFeature(shape=(1), dtype=tf.float32)
    
    for feature_name in CATEGORICAL_FEATURE_NAMES:
        feature_spec[feature_name] = tf.FixedLenFeature(shape=(1), dtype=tf.string)
    
    feature_spec[TARGET_NAME] = tf.FixedLenFeature(shape=(1), dtype=tf.float32)

    parsed_features = tf.parse_example(serialized=example_proto, features=feature_spec)
    
    target = parsed_features.pop(TARGET_NAME)
    
    return parsed_features, target


def process_features(features):
    
    # example of clipping
    features['x'] = tf.clip_by_value(features['x'], clip_value_min=-3, clip_value_max=3)
    features['y'] = tf.clip_by_value(features['y'], clip_value_min=-3, clip_value_max=3)
    
    # example of polynomial expansion
    features["x_2"] = tf.square(features['x'])
    features["y_2"] = tf.square(features['y'])
    
    # example of nonlinearity
    features["xy"] = features['x'] * features['y']
    
    # example of custom logic
    features['dist_xy'] =  tf.sqrt(tf.squared_difference(features['x'],features['y']))
    features["sin_x"] = tf.sin(features['x'])
    features["cos_y"] = tf.sin(features['y'])
    
    
    
    return features

b. Data pipeline input function


In [5]:
def tfrecods_input_fn(files_name_pattern, mode=tf.estimator.ModeKeys.EVAL, 
                 num_epochs=None, 
                 batch_size=200):
    
    shuffle = True if mode == tf.estimator.ModeKeys.TRAIN else False
    
    print("")
    print("* data input_fn:")
    print("================")
    print("Input file(s): {}".format(files_name_pattern))
    print("Batch size: {}".format(batch_size))
    print("Epoch Count: {}".format(num_epochs))
    print("Mode: {}".format(mode))
    print("Shuffle: {}".format(shuffle))
    print("================")
    print("")

    file_names = tf.matching_files(files_name_pattern)
    dataset = data.TFRecordDataset(filenames=file_names)

    if shuffle:
        dataset = dataset.shuffle(buffer_size=2 * batch_size + 1)
    
    dataset = dataset.batch(batch_size)
    dataset = dataset.map(lambda tf_example: parse_tf_example(tf_example))
    
    if PROCESS_FEATURES:
        dataset = dataset.map(lambda features, target: (process_features(features), target))
        
    dataset = dataset.repeat(num_epochs)
    iterator = dataset.make_one_shot_iterator()
    
    features, target = iterator.get_next()
    return features, target

In [6]:
features, target = tfrecods_input_fn(files_name_pattern="")
print("Feature read from TFRecords: {}".format(list(features.keys())))
print("Target read from TFRecords: {}".format(target))


* data input_fn:
================
Input file(s): 
Batch size: 200
Epoch Count: None
Mode: eval
Shuffle: False
================

Feature read from TFRecords: ['alpha', 'beta', 'x', 'y', 'x_2', 'y_2', 'xy', 'dist_xy', 'sin_x', 'cos_y']
Target read from TFRecords: Tensor("IteratorGetNext:10", shape=(?, 1), dtype=float32)

3. Define Feature Columns

The input numeric columns are assumed to be normalized (or have the same scale). Otherwise, a normlizer_fn, along with the normlisation params (mean, stdv or min, max) should be passed to tf.feature_column.numeric_column() constructor


In [7]:
def extend_feature_columns(feature_columns, hparams):
    
    num_buckets = hparams.num_buckets
    embedding_size = hparams.embedding_size

    buckets = np.linspace(-3, 3, num_buckets).tolist()

    alpha_X_beta = tf.feature_column.crossed_column(
            [feature_columns['alpha'], feature_columns['beta']], 4)

    x_bucketized = tf.feature_column.bucketized_column(
            feature_columns['x'], boundaries=buckets)

    y_bucketized = tf.feature_column.bucketized_column(
            feature_columns['y'], boundaries=buckets)

    x_bucketized_X_y_bucketized = tf.feature_column.crossed_column(
           [x_bucketized, y_bucketized], num_buckets**2)

    x_bucketized_X_y_bucketized_embedded = tf.feature_column.embedding_column(
            x_bucketized_X_y_bucketized, dimension=embedding_size)


    feature_columns['alpha_X_beta'] = alpha_X_beta
    feature_columns['x_bucketized_X_y_bucketized'] = x_bucketized_X_y_bucketized
    feature_columns['x_bucketized_X_y_bucketized_embedded'] = x_bucketized_X_y_bucketized_embedded
    
    return feature_columns
    

def get_feature_columns(hparams):
    
    CONSTRUCTED_NUMERIC_FEATURES_NAMES = ['x_2', 'y_2', 'xy', 'dist_xy', 'sin_x', 'cos_y']
    all_numeric_feature_names = NUMERIC_FEATURE_NAMES.copy() 
    
    if PROCESS_FEATURES:
        all_numeric_feature_names += CONSTRUCTED_NUMERIC_FEATURES_NAMES

    numeric_columns = {feature_name: tf.feature_column.numeric_column(feature_name)
                       for feature_name in all_numeric_feature_names}

    categorical_column_with_vocabulary = \
        {item[0]: tf.feature_column.categorical_column_with_vocabulary_list(item[0], item[1])
         for item in CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.items()}
        
    feature_columns = {}

    if numeric_columns is not None:
        feature_columns.update(numeric_columns)

    if categorical_column_with_vocabulary is not None:
        feature_columns.update(categorical_column_with_vocabulary)
    
    if EXTEND_FEATURE_COLUMNS:
        feature_columns = extend_feature_columns(feature_columns, hparams)
        
    return feature_columns

feature_columns = get_feature_columns(tf.contrib.training.HParams(num_buckets=5,embedding_size=3))
print("Feature Columns: {}".format(feature_columns))


Feature Columns: {'x': _NumericColumn(key='x', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), 'y': _NumericColumn(key='y', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), 'x_2': _NumericColumn(key='x_2', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), 'y_2': _NumericColumn(key='y_2', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), 'xy': _NumericColumn(key='xy', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), 'dist_xy': _NumericColumn(key='dist_xy', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), 'sin_x': _NumericColumn(key='sin_x', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), 'cos_y': _NumericColumn(key='cos_y', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), 'alpha': _VocabularyListCategoricalColumn(key='alpha', vocabulary_list=('ax01', 'ax02'), dtype=tf.string, default_value=-1, num_oov_buckets=0), 'beta': _VocabularyListCategoricalColumn(key='beta', vocabulary_list=('bx01', 'bx02'), dtype=tf.string, default_value=-1, num_oov_buckets=0), 'alpha_X_beta': _CrossedColumn(keys=(_VocabularyListCategoricalColumn(key='alpha', vocabulary_list=('ax01', 'ax02'), dtype=tf.string, default_value=-1, num_oov_buckets=0), _VocabularyListCategoricalColumn(key='beta', vocabulary_list=('bx01', 'bx02'), dtype=tf.string, default_value=-1, num_oov_buckets=0)), hash_bucket_size=4, hash_key=None), 'x_bucketized_X_y_bucketized': _CrossedColumn(keys=(_BucketizedColumn(source_column=_NumericColumn(key='x', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), boundaries=(-3.0, -1.5, 0.0, 1.5, 3.0)), _BucketizedColumn(source_column=_NumericColumn(key='y', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), boundaries=(-3.0, -1.5, 0.0, 1.5, 3.0))), hash_bucket_size=25, hash_key=None), 'x_bucketized_X_y_bucketized_embedded': _EmbeddingColumn(categorical_column=_CrossedColumn(keys=(_BucketizedColumn(source_column=_NumericColumn(key='x', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), boundaries=(-3.0, -1.5, 0.0, 1.5, 3.0)), _BucketizedColumn(source_column=_NumericColumn(key='y', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), boundaries=(-3.0, -1.5, 0.0, 1.5, 3.0))), hash_bucket_size=25, hash_key=None), dimension=3, combiner='mean', initializer=<tensorflow.python.ops.init_ops.TruncatedNormal object at 0x11b3740b8>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None, trainable=True)}

4. Define an Estimator Creation Function

a. Get wide and deep feature columns

  • dense columns = numeric columns + embedding columns
  • categorical columns = vocabolary list columns + bucketized columns
  • sparse columns = hashed categorical columns + crossed columns
  • categorical columns => indicator columns
  • deep columns = dense columns + indicator columns
  • wide columns = categorical columns + sparse columns

In [8]:
def get_wide_deep_columns():
    
    feature_columns = list(get_feature_columns(hparams).values())
    
    dense_columns = list(
        filter(lambda column: isinstance(column, feature_column._NumericColumn) |
                              isinstance(column, feature_column._EmbeddingColumn),
               feature_columns
        )
    )

    categorical_columns = list(
        filter(lambda column: isinstance(column, feature_column._VocabularyListCategoricalColumn) |
                              isinstance(column, feature_column._BucketizedColumn),
                   feature_columns)
    )
    
    sparse_columns = list(
        filter(lambda column: isinstance(column,feature_column._HashedCategoricalColumn) |
                              isinstance(column, feature_column._CrossedColumn),
               feature_columns)
    )

    indicator_columns = list(
            map(lambda column: tf.feature_column.indicator_column(column),
                categorical_columns)
    )
    
    deep_feature_columns = dense_columns + indicator_columns
    wide_feature_columns = categorical_columns + sparse_columns
    
    return wide_feature_columns, deep_feature_columns

b. Define the DNNLinearCombinedRegressor


In [9]:
def create_estimator(run_config, hparams, print_desc=False):
    
    wide_feature_columns, deep_feature_columns = get_wide_deep_columns()
    
    estimator = tf.estimator.DNNLinearCombinedRegressor(
        
        dnn_feature_columns = deep_feature_columns,
        linear_feature_columns = wide_feature_columns,
        
        dnn_hidden_units= hparams.hidden_units,
        
        dnn_optimizer= tf.train.AdamOptimizer(),
        
        dnn_activation_fn= tf.nn.elu,
        dnn_dropout= hparams.dropout_prob,
        
        config= run_config
    )
    
    
    if print_desc:
        print("")
        print("*Estimator Type:")
        print("================")
        print(type(estimator))
        print("")
        print("*deep columns:")
        print("==============")
        print(deep_feature_columns)
        print("")
        print("wide columns:")
        print("=============")
        print(wide_feature_columns)
        print("")
    
    return estimator

5. Run Experiment

a. Set HParam and RunConfig


In [10]:
TRAIN_SIZE = 12000
NUM_EPOCHS = 1000
BATCH_SIZE = 500
EVAL_AFTER_SEC = 15
TOTAL_STEPS = (TRAIN_SIZE/BATCH_SIZE)*NUM_EPOCHS

hparams  = tf.contrib.training.HParams(
    num_epochs = NUM_EPOCHS,
    batch_size = BATCH_SIZE,
    hidden_units=[16, 12, 8],
    num_buckets = 6,
    embedding_size = 3,
    max_steps = TOTAL_STEPS,
    dropout_prob = 0.001)

model_dir = 'trained_models/{}'.format(MODEL_NAME)

run_config = tf.estimator.RunConfig(
    tf_random_seed=19830610,
    model_dir=model_dir
)

print(hparams)
print("Model Directory:", run_config.model_dir)
print("")
print("Dataset Size:", TRAIN_SIZE)
print("Batch Size:", BATCH_SIZE)
print("Steps per Epoch:",TRAIN_SIZE/BATCH_SIZE)
print("Total Steps:", TOTAL_STEPS)
print("That is 1 evaluation step after each",EVAL_AFTER_SEC," training seconds")


[('batch_size', 500), ('dropout_prob', 0.001), ('embedding_size', 3), ('hidden_units', [16, 12, 8]), ('max_steps', 24000.0), ('num_buckets', 6), ('num_epochs', 1000)]
Model Directory: trained_models/reg-model-06

Dataset Size: 12000
Batch Size: 500
Steps per Epoch: 24.0
Total Steps: 24000.0
That is 1 evaluation step after each 15  training seconds

b. Define Serving Function


In [11]:
def csv_serving_input_fn():
    
    SERVING_HEADER = ['x','y','alpha','beta']
    SERVING_HEADER_DEFAULTS = [[0.0], [0.0], ['NA'], ['NA']]

    rows_string_tensor = tf.placeholder(dtype=tf.string,
                                         shape=[None],
                                         name='csv_rows')
    
    receiver_tensor = {'csv_rows': rows_string_tensor}

    row_columns = tf.expand_dims(rows_string_tensor, -1)
    columns = tf.decode_csv(row_columns, record_defaults=SERVING_HEADER_DEFAULTS)
    features = dict(zip(SERVING_HEADER, columns))
    
    if PROCESS_FEATURES:
        features = process_features(features)

    return tf.estimator.export.ServingInputReceiver(
        features, receiver_tensor)

c. Define an Early Stopping Monitor (Hook)


In [12]:
class EarlyStoppingHook(tf.train.SessionRunHook):
    
    def __init__(self, early_stopping_rounds=1):
        self._best_loss = None
        self._early_stopping_rounds = early_stopping_rounds
        self._counter = 0
        
        print("")
        print("*** Early Stopping Hook: - Created")
        print("*** Early Stopping Hook:: Early Stopping Rounds: {}".format(self._early_stopping_rounds))
        print("")

    def before_run(self, run_context):
        
        graph = run_context.session.graph
        
#         tensor_name = "dnn/head/weighted_loss/Sum:0" #works!!
#         loss_tensor = graph.get_tensor_by_name(tensor_name)

        loss_tensor = graph.get_collection(tf.GraphKeys.LOSSES)[1]
        return tf.train.SessionRunArgs(loss_tensor)

    def after_run(self, run_context, run_values):
        
        last_loss = run_values.results
        
        print("")
        print("************************")
        print("** Evaluation Monitor - Early Stopping **")
        print("-----------------------------------------")
        print("Early Stopping Hook: Current loss: {}".format(str(last_loss)))
        print("Early Stopping Hook: Best loss: {}".format(str(self._best_loss)))

        if self._best_loss is None:
            self._best_loss = last_loss
            
        elif last_loss > self._best_loss:
            
            self._counter += 1
            print("Early Stopping Hook: No improvment! Counter: {}".format(self._counter))
            
            if self._counter == self._early_stopping_rounds:
                
                run_context.request_stop()
                print("Early Stopping Hook: Stop Requested: {}".format(run_context.stop_requested))
        else:
            
            self._best_loss = last_loss
            self._counter = 0
            
        print("************************")
        print("")

d. Define TrainSpec and EvaluSpec


In [13]:
train_spec = tf.estimator.TrainSpec(
    input_fn = lambda: tfrecods_input_fn(
        TRAIN_DATA_FILES_PATTERN,
        mode = tf.estimator.ModeKeys.TRAIN,
        num_epochs=hparams.num_epochs,
        batch_size=hparams.batch_size
    ),
    max_steps=hparams.max_steps,
    hooks=None
)

eval_spec = tf.estimator.EvalSpec(
    input_fn = lambda: tfrecods_input_fn(
        VALID_DATA_FILES_PATTERN,
        mode=tf.estimator.ModeKeys.EVAL,
        num_epochs=1,
        batch_size=hparams.batch_size
    ),
    exporters=[tf.estimator.LatestExporter(
        name="estimate",  # the name of the folder in which the model will be exported to under export
        serving_input_receiver_fn=csv_serving_input_fn,
        exports_to_keep=1,
        as_text=True)],
    steps=None,
    #hooks=[EarlyStoppingHook(15)],
    throttle_secs = EVAL_AFTER_SEC # evalute after each 15 training seconds!
)

e. Run Experiment via train_and_evaluate


In [14]:
if not RESUME_TRAINING:
    print("Removing previous artifacts...")
    shutil.rmtree(model_dir, ignore_errors=True)
else:
    print("Resuming training...") 

    
tf.logging.set_verbosity(tf.logging.INFO)

time_start = datetime.utcnow() 
print("Experiment started at {}".format(time_start.strftime("%H:%M:%S")))
print(".......................................") 

estimator = create_estimator(run_config, hparams, True)

tf.estimator.train_and_evaluate(
    estimator=estimator,
    train_spec=train_spec, 
    eval_spec=eval_spec
)

time_end = datetime.utcnow() 
print(".......................................")
print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S")))
print("")
time_elapsed = time_end - time_start
print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))


Removing previous artifacts...
Experiment started at 13:48:48
.......................................
INFO:tensorflow:Using config: {'_model_dir': 'trained_models/reg-model-06', '_tf_random_seed': 19830610, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x11b3ec358>, '_task_type': 'worker', '_task_id': 0, '_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}

*Estimator Type:
================
<class 'tensorflow.python.estimator.canned.dnn_linear_combined.DNNLinearCombinedRegressor'>

*deep columns:
==============
[_NumericColumn(key='x', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), _NumericColumn(key='y', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), _NumericColumn(key='x_2', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), _NumericColumn(key='y_2', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), _NumericColumn(key='xy', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), _NumericColumn(key='dist_xy', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), _NumericColumn(key='sin_x', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), _NumericColumn(key='cos_y', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), _EmbeddingColumn(categorical_column=_CrossedColumn(keys=(_BucketizedColumn(source_column=_NumericColumn(key='x', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), boundaries=(-3.0, -1.8, -0.6000000000000001, 0.5999999999999996, 1.7999999999999998, 3.0)), _BucketizedColumn(source_column=_NumericColumn(key='y', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), boundaries=(-3.0, -1.8, -0.6000000000000001, 0.5999999999999996, 1.7999999999999998, 3.0))), hash_bucket_size=36, hash_key=None), dimension=3, combiner='mean', initializer=<tensorflow.python.ops.init_ops.TruncatedNormal object at 0x107a42978>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None, trainable=True), _IndicatorColumn(categorical_column=_VocabularyListCategoricalColumn(key='alpha', vocabulary_list=('ax01', 'ax02'), dtype=tf.string, default_value=-1, num_oov_buckets=0)), _IndicatorColumn(categorical_column=_VocabularyListCategoricalColumn(key='beta', vocabulary_list=('bx01', 'bx02'), dtype=tf.string, default_value=-1, num_oov_buckets=0))]

wide columns:
=============
[_VocabularyListCategoricalColumn(key='alpha', vocabulary_list=('ax01', 'ax02'), dtype=tf.string, default_value=-1, num_oov_buckets=0), _VocabularyListCategoricalColumn(key='beta', vocabulary_list=('bx01', 'bx02'), dtype=tf.string, default_value=-1, num_oov_buckets=0), _CrossedColumn(keys=(_VocabularyListCategoricalColumn(key='alpha', vocabulary_list=('ax01', 'ax02'), dtype=tf.string, default_value=-1, num_oov_buckets=0), _VocabularyListCategoricalColumn(key='beta', vocabulary_list=('bx01', 'bx02'), dtype=tf.string, default_value=-1, num_oov_buckets=0)), hash_bucket_size=4, hash_key=None), _CrossedColumn(keys=(_BucketizedColumn(source_column=_NumericColumn(key='x', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), boundaries=(-3.0, -1.8, -0.6000000000000001, 0.5999999999999996, 1.7999999999999998, 3.0)), _BucketizedColumn(source_column=_NumericColumn(key='y', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), boundaries=(-3.0, -1.8, -0.6000000000000001, 0.5999999999999996, 1.7999999999999998, 3.0))), hash_bucket_size=36, hash_key=None)]

INFO:tensorflow:Running training and evaluation locally (non-distributed).
INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 15 secs (eval_spec.throttle_secs) or training is finished.

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 159622.0, step = 1
INFO:tensorflow:global_step/sec: 115.565
INFO:tensorflow:loss = 139077.0, step = 101 (0.869 sec)
INFO:tensorflow:global_step/sec: 166.183
INFO:tensorflow:loss = 129130.0, step = 201 (0.606 sec)
INFO:tensorflow:global_step/sec: 184.358
INFO:tensorflow:loss = 107341.0, step = 301 (0.538 sec)
INFO:tensorflow:global_step/sec: 187.313
INFO:tensorflow:loss = 88040.5, step = 401 (0.533 sec)
INFO:tensorflow:global_step/sec: 201.453
INFO:tensorflow:loss = 68818.4, step = 501 (0.497 sec)
INFO:tensorflow:global_step/sec: 192.814
INFO:tensorflow:loss = 66175.3, step = 601 (0.519 sec)
INFO:tensorflow:global_step/sec: 199.504
INFO:tensorflow:loss = 66000.3, step = 701 (0.501 sec)
INFO:tensorflow:global_step/sec: 182.524
INFO:tensorflow:loss = 60487.0, step = 801 (0.553 sec)
INFO:tensorflow:global_step/sec: 147.5
INFO:tensorflow:loss = 54708.5, step = 901 (0.677 sec)
INFO:tensorflow:global_step/sec: 149.767
INFO:tensorflow:loss = 49084.7, step = 1001 (0.665 sec)
INFO:tensorflow:global_step/sec: 154.148
INFO:tensorflow:loss = 56067.9, step = 1101 (0.649 sec)
INFO:tensorflow:global_step/sec: 156.009
INFO:tensorflow:loss = 45968.8, step = 1201 (0.641 sec)
INFO:tensorflow:Saving checkpoints for 1211 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 48667.1.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:49:11
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-1211
INFO:tensorflow:Finished evaluation at 2017-11-25-13:49:12
INFO:tensorflow:Saving dict for global step 1211: average_loss = 106.544, global_step = 1211, loss = 53272.0
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-1211
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617754'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-1211
INFO:tensorflow:Saving checkpoints for 1212 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 52684.3, step = 1212
INFO:tensorflow:global_step/sec: 137.914
INFO:tensorflow:loss = 57731.3, step = 1312 (0.726 sec)
INFO:tensorflow:global_step/sec: 199.554
INFO:tensorflow:loss = 47728.5, step = 1412 (0.501 sec)
INFO:tensorflow:global_step/sec: 196.491
INFO:tensorflow:loss = 44760.7, step = 1512 (0.508 sec)
INFO:tensorflow:global_step/sec: 190.21
INFO:tensorflow:loss = 50615.3, step = 1612 (0.526 sec)
INFO:tensorflow:global_step/sec: 198.84
INFO:tensorflow:loss = 41685.8, step = 1712 (0.502 sec)
INFO:tensorflow:global_step/sec: 197.194
INFO:tensorflow:loss = 42754.8, step = 1812 (0.508 sec)
INFO:tensorflow:global_step/sec: 193.783
INFO:tensorflow:loss = 46666.3, step = 1912 (0.515 sec)
INFO:tensorflow:global_step/sec: 200.002
INFO:tensorflow:loss = 46649.9, step = 2012 (0.500 sec)
INFO:tensorflow:global_step/sec: 202.967
INFO:tensorflow:loss = 43571.0, step = 2112 (0.493 sec)
INFO:tensorflow:global_step/sec: 200.899
INFO:tensorflow:loss = 42563.7, step = 2212 (0.497 sec)
INFO:tensorflow:global_step/sec: 200.237
INFO:tensorflow:loss = 49189.3, step = 2312 (0.501 sec)
INFO:tensorflow:global_step/sec: 198.474
INFO:tensorflow:loss = 43832.0, step = 2412 (0.504 sec)
INFO:tensorflow:global_step/sec: 200.55
INFO:tensorflow:loss = 38215.2, step = 2512 (0.498 sec)
INFO:tensorflow:global_step/sec: 195.869
INFO:tensorflow:loss = 41644.2, step = 2612 (0.511 sec)
INFO:tensorflow:global_step/sec: 193.279
INFO:tensorflow:loss = 41849.2, step = 2712 (0.517 sec)
INFO:tensorflow:Saving checkpoints for 2728 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 41730.8.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:49:36
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-2728
INFO:tensorflow:Finished evaluation at 2017-11-25-13:49:37
INFO:tensorflow:Saving dict for global step 2728: average_loss = 95.736, global_step = 2728, loss = 47868.0
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-2728
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617778'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-2728
INFO:tensorflow:Saving checkpoints for 2729 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 49217.8, step = 2729
INFO:tensorflow:global_step/sec: 126.575
INFO:tensorflow:loss = 53845.1, step = 2829 (0.789 sec)
INFO:tensorflow:global_step/sec: 234.061
INFO:tensorflow:loss = 43269.9, step = 2929 (0.428 sec)
INFO:tensorflow:global_step/sec: 194.415
INFO:tensorflow:loss = 41291.9, step = 3029 (0.513 sec)
INFO:tensorflow:global_step/sec: 174.058
INFO:tensorflow:loss = 48649.3, step = 3129 (0.576 sec)
INFO:tensorflow:global_step/sec: 181.352
INFO:tensorflow:loss = 39758.8, step = 3229 (0.551 sec)
INFO:tensorflow:global_step/sec: 210.775
INFO:tensorflow:loss = 40536.8, step = 3329 (0.474 sec)
INFO:tensorflow:global_step/sec: 244.418
INFO:tensorflow:loss = 43709.2, step = 3429 (0.408 sec)
INFO:tensorflow:global_step/sec: 239.019
INFO:tensorflow:loss = 44780.2, step = 3529 (0.419 sec)
INFO:tensorflow:global_step/sec: 207.444
INFO:tensorflow:loss = 41581.0, step = 3629 (0.484 sec)
INFO:tensorflow:global_step/sec: 187.426
INFO:tensorflow:loss = 40304.6, step = 3729 (0.532 sec)
INFO:tensorflow:global_step/sec: 180.477
INFO:tensorflow:loss = 47941.6, step = 3829 (0.555 sec)
INFO:tensorflow:global_step/sec: 188.505
INFO:tensorflow:loss = 42714.3, step = 3929 (0.530 sec)
INFO:tensorflow:global_step/sec: 190.218
INFO:tensorflow:loss = 37152.8, step = 4029 (0.528 sec)
INFO:tensorflow:global_step/sec: 153.264
INFO:tensorflow:loss = 40304.6, step = 4129 (0.652 sec)
INFO:tensorflow:global_step/sec: 180.345
INFO:tensorflow:loss = 40580.1, step = 4229 (0.553 sec)
INFO:tensorflow:Saving checkpoints for 4292 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 42083.8.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:50:00
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-4292
INFO:tensorflow:Finished evaluation at 2017-11-25-13:50:01
INFO:tensorflow:Saving dict for global step 4292: average_loss = 94.103, global_step = 4292, loss = 47051.5
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-4292
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617803'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-4292
INFO:tensorflow:Saving checkpoints for 4293 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 48104.3, step = 4293
INFO:tensorflow:global_step/sec: 110.02
INFO:tensorflow:loss = 52260.1, step = 4393 (0.913 sec)
INFO:tensorflow:global_step/sec: 171.573
INFO:tensorflow:loss = 42716.5, step = 4493 (0.582 sec)
INFO:tensorflow:global_step/sec: 189.716
INFO:tensorflow:loss = 40238.1, step = 4593 (0.526 sec)
INFO:tensorflow:global_step/sec: 199.554
INFO:tensorflow:loss = 47930.6, step = 4693 (0.500 sec)
INFO:tensorflow:global_step/sec: 199.476
INFO:tensorflow:loss = 39066.1, step = 4793 (0.503 sec)
INFO:tensorflow:global_step/sec: 211.603
INFO:tensorflow:loss = 39717.1, step = 4893 (0.471 sec)
INFO:tensorflow:global_step/sec: 156.171
INFO:tensorflow:loss = 42879.6, step = 4993 (0.641 sec)
INFO:tensorflow:global_step/sec: 188.523
INFO:tensorflow:loss = 44072.4, step = 5093 (0.531 sec)
INFO:tensorflow:global_step/sec: 212.28
INFO:tensorflow:loss = 40898.3, step = 5193 (0.470 sec)
INFO:tensorflow:global_step/sec: 203.528
INFO:tensorflow:loss = 39662.0, step = 5293 (0.492 sec)
INFO:tensorflow:global_step/sec: 210.565
INFO:tensorflow:loss = 47551.8, step = 5393 (0.475 sec)
INFO:tensorflow:Saving checkpoints for 5427 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 38144.6.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:50:26
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-5427
INFO:tensorflow:Finished evaluation at 2017-11-25-13:50:27
INFO:tensorflow:Saving dict for global step 5427: average_loss = 94.0572, global_step = 5427, loss = 47028.6
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-5427
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617828'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-5427
INFO:tensorflow:Saving checkpoints for 5428 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 47880.6, step = 5428
INFO:tensorflow:global_step/sec: 94.089
INFO:tensorflow:loss = 51792.2, step = 5528 (1.065 sec)
INFO:tensorflow:global_step/sec: 184.034
INFO:tensorflow:loss = 42491.6, step = 5628 (0.540 sec)
INFO:tensorflow:global_step/sec: 176.747
INFO:tensorflow:loss = 40018.4, step = 5728 (0.565 sec)
INFO:tensorflow:global_step/sec: 167.421
INFO:tensorflow:loss = 47734.9, step = 5828 (0.599 sec)
INFO:tensorflow:global_step/sec: 179.948
INFO:tensorflow:loss = 38679.7, step = 5928 (0.558 sec)
INFO:tensorflow:global_step/sec: 189.015
INFO:tensorflow:loss = 39437.9, step = 6028 (0.526 sec)
INFO:tensorflow:global_step/sec: 203.177
INFO:tensorflow:loss = 42512.3, step = 6128 (0.497 sec)
INFO:tensorflow:global_step/sec: 199.452
INFO:tensorflow:loss = 43787.1, step = 6228 (0.497 sec)
INFO:tensorflow:global_step/sec: 215.91
INFO:tensorflow:loss = 40581.8, step = 6328 (0.465 sec)
INFO:tensorflow:Saving checkpoints for 6375 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 51887.2.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:50:51
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-6375
INFO:tensorflow:Finished evaluation at 2017-11-25-13:50:52
INFO:tensorflow:Saving dict for global step 6375: average_loss = 94.0575, global_step = 6375, loss = 47028.7
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-6375
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617852'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-6375
INFO:tensorflow:Saving checkpoints for 6376 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 47785.1, step = 6376
INFO:tensorflow:global_step/sec: 156.142
INFO:tensorflow:loss = 51610.9, step = 6476 (0.641 sec)
INFO:tensorflow:global_step/sec: 245.906
INFO:tensorflow:loss = 42362.8, step = 6576 (0.407 sec)
INFO:tensorflow:global_step/sec: 242.25
INFO:tensorflow:loss = 39857.3, step = 6676 (0.412 sec)
INFO:tensorflow:global_step/sec: 254.732
INFO:tensorflow:loss = 47615.8, step = 6776 (0.393 sec)
INFO:tensorflow:global_step/sec: 248.463
INFO:tensorflow:loss = 38528.7, step = 6876 (0.402 sec)
INFO:tensorflow:global_step/sec: 236.017
INFO:tensorflow:loss = 39255.3, step = 6976 (0.424 sec)
INFO:tensorflow:global_step/sec: 248.995
INFO:tensorflow:loss = 42299.6, step = 7076 (0.402 sec)
INFO:tensorflow:global_step/sec: 241.76
INFO:tensorflow:loss = 43587.3, step = 7176 (0.414 sec)
INFO:tensorflow:global_step/sec: 248.009
INFO:tensorflow:loss = 40360.6, step = 7276 (0.403 sec)
INFO:tensorflow:global_step/sec: 245.305
INFO:tensorflow:loss = 39294.0, step = 7376 (0.408 sec)
INFO:tensorflow:global_step/sec: 241.605
INFO:tensorflow:loss = 47278.1, step = 7476 (0.415 sec)
INFO:tensorflow:global_step/sec: 235.677
INFO:tensorflow:loss = 42108.9, step = 7576 (0.424 sec)
INFO:tensorflow:global_step/sec: 246.509
INFO:tensorflow:loss = 37058.1, step = 7676 (0.406 sec)
INFO:tensorflow:global_step/sec: 250.619
INFO:tensorflow:loss = 39617.8, step = 7776 (0.399 sec)
INFO:tensorflow:global_step/sec: 251.301
INFO:tensorflow:loss = 40308.3, step = 7876 (0.397 sec)
INFO:tensorflow:global_step/sec: 249.84
INFO:tensorflow:loss = 49720.0, step = 7976 (0.401 sec)
INFO:tensorflow:global_step/sec: 256.469
INFO:tensorflow:loss = 37273.1, step = 8076 (0.390 sec)
INFO:tensorflow:global_step/sec: 248.136
INFO:tensorflow:loss = 46591.6, step = 8176 (0.403 sec)
INFO:tensorflow:global_step/sec: 247.399
INFO:tensorflow:loss = 37848.4, step = 8276 (0.404 sec)
INFO:tensorflow:global_step/sec: 249.299
INFO:tensorflow:loss = 49566.2, step = 8376 (0.401 sec)
INFO:tensorflow:global_step/sec: 241.049
INFO:tensorflow:loss = 50425.0, step = 8476 (0.415 sec)
INFO:tensorflow:Saving checkpoints for 8525 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 41912.6.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:51:13
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-8525
INFO:tensorflow:Finished evaluation at 2017-11-25-13:51:14
INFO:tensorflow:Saving dict for global step 8525: average_loss = 94.0083, global_step = 8525, loss = 47004.1
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-8525
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617875'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-8525
INFO:tensorflow:Saving checkpoints for 8526 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 47604.9, step = 8526
INFO:tensorflow:global_step/sec: 127.568
INFO:tensorflow:loss = 51467.5, step = 8626 (0.785 sec)
INFO:tensorflow:global_step/sec: 249.957
INFO:tensorflow:loss = 42135.0, step = 8726 (0.399 sec)
INFO:tensorflow:global_step/sec: 234.288
INFO:tensorflow:loss = 39552.5, step = 8826 (0.427 sec)
INFO:tensorflow:global_step/sec: 217.791
INFO:tensorflow:loss = 47384.7, step = 8926 (0.460 sec)
INFO:tensorflow:global_step/sec: 213.817
INFO:tensorflow:loss = 38228.4, step = 9026 (0.467 sec)
INFO:tensorflow:global_step/sec: 263.503
INFO:tensorflow:loss = 38941.3, step = 9126 (0.380 sec)
INFO:tensorflow:global_step/sec: 266.039
INFO:tensorflow:loss = 41861.9, step = 9226 (0.375 sec)
INFO:tensorflow:global_step/sec: 266.104
INFO:tensorflow:loss = 43279.9, step = 9326 (0.378 sec)
INFO:tensorflow:global_step/sec: 268.896
INFO:tensorflow:loss = 39999.9, step = 9426 (0.370 sec)
INFO:tensorflow:global_step/sec: 269.247
INFO:tensorflow:loss = 39155.7, step = 9526 (0.371 sec)
INFO:tensorflow:global_step/sec: 270.424
INFO:tensorflow:loss = 47052.7, step = 9626 (0.370 sec)
INFO:tensorflow:global_step/sec: 267.118
INFO:tensorflow:loss = 42009.6, step = 9726 (0.374 sec)
INFO:tensorflow:global_step/sec: 274.54
INFO:tensorflow:loss = 37124.2, step = 9826 (0.364 sec)
INFO:tensorflow:global_step/sec: 271.244
INFO:tensorflow:loss = 39477.1, step = 9926 (0.368 sec)
INFO:tensorflow:global_step/sec: 268.371
INFO:tensorflow:loss = 40266.2, step = 10026 (0.373 sec)
INFO:tensorflow:global_step/sec: 269.783
INFO:tensorflow:loss = 49461.3, step = 10126 (0.371 sec)
INFO:tensorflow:global_step/sec: 266.796
INFO:tensorflow:loss = 37287.8, step = 10226 (0.375 sec)
INFO:tensorflow:global_step/sec: 272.567
INFO:tensorflow:loss = 46565.5, step = 10326 (0.367 sec)
INFO:tensorflow:global_step/sec: 266.213
INFO:tensorflow:loss = 37719.7, step = 10426 (0.376 sec)
INFO:tensorflow:Saving checkpoints for 10502 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 46747.1.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:51:36
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-10502
INFO:tensorflow:Finished evaluation at 2017-11-25-13:51:37
INFO:tensorflow:Saving dict for global step 10502: average_loss = 93.9545, global_step = 10502, loss = 46977.2
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-10502
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617898'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-10502
INFO:tensorflow:Saving checkpoints for 10503 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 47342.7, step = 10503
INFO:tensorflow:global_step/sec: 127.549
INFO:tensorflow:loss = 51327.8, step = 10603 (0.785 sec)
INFO:tensorflow:global_step/sec: 159.806
INFO:tensorflow:loss = 42009.0, step = 10703 (0.629 sec)
INFO:tensorflow:global_step/sec: 190.088
INFO:tensorflow:loss = 39328.9, step = 10803 (0.523 sec)
INFO:tensorflow:global_step/sec: 203.162
INFO:tensorflow:loss = 47166.4, step = 10903 (0.495 sec)
INFO:tensorflow:global_step/sec: 159.804
INFO:tensorflow:loss = 37983.7, step = 11003 (0.628 sec)
INFO:tensorflow:global_step/sec: 185.889
INFO:tensorflow:loss = 38733.7, step = 11103 (0.534 sec)
INFO:tensorflow:global_step/sec: 198.454
INFO:tensorflow:loss = 41631.9, step = 11203 (0.505 sec)
INFO:tensorflow:global_step/sec: 174.911
INFO:tensorflow:loss = 43145.7, step = 11303 (0.573 sec)
INFO:tensorflow:global_step/sec: 162.598
INFO:tensorflow:loss = 39779.1, step = 11403 (0.614 sec)
INFO:tensorflow:global_step/sec: 171.732
INFO:tensorflow:loss = 39065.7, step = 11503 (0.580 sec)
INFO:tensorflow:global_step/sec: 224.6
INFO:tensorflow:loss = 46871.2, step = 11603 (0.444 sec)
INFO:tensorflow:global_step/sec: 182.261
INFO:tensorflow:loss = 41940.3, step = 11703 (0.550 sec)
INFO:tensorflow:global_step/sec: 199.088
INFO:tensorflow:loss = 37163.0, step = 11803 (0.503 sec)
INFO:tensorflow:Saving checkpoints for 11836 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 39643.0.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:52:01
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-11836
INFO:tensorflow:Finished evaluation at 2017-11-25-13:52:02
INFO:tensorflow:Saving dict for global step 11836: average_loss = 93.9688, global_step = 11836, loss = 46984.4
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-11836
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617923'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-11836
INFO:tensorflow:Saving checkpoints for 11837 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 47268.9, step = 11837
INFO:tensorflow:global_step/sec: 111.747
INFO:tensorflow:loss = 51264.5, step = 11937 (0.895 sec)
INFO:tensorflow:global_step/sec: 212.818
INFO:tensorflow:loss = 41930.6, step = 12037 (0.471 sec)
INFO:tensorflow:global_step/sec: 174.46
INFO:tensorflow:loss = 39180.2, step = 12137 (0.571 sec)
INFO:tensorflow:global_step/sec: 256.187
INFO:tensorflow:loss = 47038.4, step = 12237 (0.390 sec)
INFO:tensorflow:global_step/sec: 197.238
INFO:tensorflow:loss = 37826.9, step = 12337 (0.507 sec)
INFO:tensorflow:global_step/sec: 245.259
INFO:tensorflow:loss = 38612.9, step = 12437 (0.409 sec)
INFO:tensorflow:global_step/sec: 215.338
INFO:tensorflow:loss = 41539.3, step = 12537 (0.463 sec)
INFO:tensorflow:global_step/sec: 243.713
INFO:tensorflow:loss = 43071.8, step = 12637 (0.411 sec)
INFO:tensorflow:global_step/sec: 208.15
INFO:tensorflow:loss = 39650.1, step = 12737 (0.483 sec)
INFO:tensorflow:global_step/sec: 239.352
INFO:tensorflow:loss = 39017.3, step = 12837 (0.414 sec)
INFO:tensorflow:global_step/sec: 222.681
INFO:tensorflow:loss = 46787.9, step = 12937 (0.451 sec)
INFO:tensorflow:global_step/sec: 251.966
INFO:tensorflow:loss = 41910.2, step = 13037 (0.395 sec)
INFO:tensorflow:global_step/sec: 241.908
INFO:tensorflow:loss = 37183.5, step = 13137 (0.414 sec)
INFO:tensorflow:global_step/sec: 230.151
INFO:tensorflow:loss = 39305.1, step = 13237 (0.434 sec)
INFO:tensorflow:global_step/sec: 204.471
INFO:tensorflow:loss = 40122.2, step = 13337 (0.491 sec)
INFO:tensorflow:Saving checkpoints for 13373 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 42051.4.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:52:25
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-13373
INFO:tensorflow:Finished evaluation at 2017-11-25-13:52:26
INFO:tensorflow:Saving dict for global step 13373: average_loss = 93.9417, global_step = 13373, loss = 46970.9
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-13373
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617947'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-13373
INFO:tensorflow:Saving checkpoints for 13374 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 47237.1, step = 13374
INFO:tensorflow:global_step/sec: 151.1
INFO:tensorflow:loss = 51208.4, step = 13474 (0.662 sec)
INFO:tensorflow:global_step/sec: 237.923
INFO:tensorflow:loss = 41852.4, step = 13574 (0.421 sec)
INFO:tensorflow:global_step/sec: 238.956
INFO:tensorflow:loss = 39035.0, step = 13674 (0.419 sec)
INFO:tensorflow:global_step/sec: 233.847
INFO:tensorflow:loss = 46916.6, step = 13774 (0.427 sec)
INFO:tensorflow:global_step/sec: 200.548
INFO:tensorflow:loss = 37676.0, step = 13874 (0.500 sec)
INFO:tensorflow:global_step/sec: 201.453
INFO:tensorflow:loss = 38498.4, step = 13974 (0.496 sec)
INFO:tensorflow:global_step/sec: 225.055
INFO:tensorflow:loss = 41472.4, step = 14074 (0.444 sec)
INFO:tensorflow:global_step/sec: 243.673
INFO:tensorflow:loss = 42983.0, step = 14174 (0.410 sec)
INFO:tensorflow:global_step/sec: 226.952
INFO:tensorflow:loss = 39537.6, step = 14274 (0.442 sec)
INFO:tensorflow:global_step/sec: 210.133
INFO:tensorflow:loss = 38981.2, step = 14374 (0.475 sec)
INFO:tensorflow:global_step/sec: 231.87
INFO:tensorflow:loss = 46710.5, step = 14474 (0.432 sec)
INFO:tensorflow:global_step/sec: 239.002
INFO:tensorflow:loss = 41902.4, step = 14574 (0.418 sec)
INFO:tensorflow:global_step/sec: 235.502
INFO:tensorflow:loss = 37200.5, step = 14674 (0.425 sec)
INFO:tensorflow:global_step/sec: 230.526
INFO:tensorflow:loss = 39260.6, step = 14774 (0.433 sec)
INFO:tensorflow:global_step/sec: 202.362
INFO:tensorflow:loss = 40037.1, step = 14874 (0.496 sec)
INFO:tensorflow:global_step/sec: 220.092
INFO:tensorflow:loss = 49181.4, step = 14974 (0.452 sec)
INFO:tensorflow:global_step/sec: 239.461
INFO:tensorflow:loss = 37152.5, step = 15074 (0.418 sec)
INFO:tensorflow:Saving checkpoints for 15133 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 42636.0.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:52:48
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-15133
INFO:tensorflow:Finished evaluation at 2017-11-25-13:52:49
INFO:tensorflow:Saving dict for global step 15133: average_loss = 93.8878, global_step = 15133, loss = 46943.9
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-15133
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617971'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-15133
INFO:tensorflow:Saving checkpoints for 15134 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 47031.7, step = 15134
INFO:tensorflow:global_step/sec: 125.867
INFO:tensorflow:loss = 51192.6, step = 15234 (0.797 sec)
INFO:tensorflow:global_step/sec: 213.494
INFO:tensorflow:loss = 41739.0, step = 15334 (0.467 sec)
INFO:tensorflow:global_step/sec: 233.127
INFO:tensorflow:loss = 38875.4, step = 15434 (0.429 sec)
INFO:tensorflow:global_step/sec: 204.753
INFO:tensorflow:loss = 46876.4, step = 15534 (0.489 sec)
INFO:tensorflow:global_step/sec: 195.941
INFO:tensorflow:loss = 37494.9, step = 15634 (0.511 sec)
INFO:tensorflow:global_step/sec: 227.864
INFO:tensorflow:loss = 38406.6, step = 15734 (0.438 sec)
INFO:tensorflow:global_step/sec: 220.162
INFO:tensorflow:loss = 41447.8, step = 15834 (0.457 sec)
INFO:tensorflow:global_step/sec: 206.822
INFO:tensorflow:loss = 42867.9, step = 15934 (0.483 sec)
INFO:tensorflow:global_step/sec: 206.549
INFO:tensorflow:loss = 39422.4, step = 16034 (0.483 sec)
INFO:tensorflow:global_step/sec: 180.991
INFO:tensorflow:loss = 38959.7, step = 16134 (0.557 sec)
INFO:tensorflow:global_step/sec: 201.021
INFO:tensorflow:loss = 46665.4, step = 16234 (0.493 sec)
INFO:tensorflow:global_step/sec: 230.008
INFO:tensorflow:loss = 41902.4, step = 16334 (0.435 sec)
INFO:tensorflow:global_step/sec: 213.353
INFO:tensorflow:loss = 37164.9, step = 16434 (0.469 sec)
INFO:tensorflow:global_step/sec: 217.875
INFO:tensorflow:loss = 39228.1, step = 16534 (0.458 sec)
INFO:tensorflow:global_step/sec: 198.838
INFO:tensorflow:loss = 39895.4, step = 16634 (0.503 sec)
INFO:tensorflow:Saving checkpoints for 16669 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 48297.2.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:53:11
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-16669
INFO:tensorflow:Finished evaluation at 2017-11-25-13:53:12
INFO:tensorflow:Saving dict for global step 16669: average_loss = 93.9069, global_step = 16669, loss = 46953.4
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-16669
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511617993'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-16669
INFO:tensorflow:Saving checkpoints for 16670 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 46963.4, step = 16670
INFO:tensorflow:global_step/sec: 133.523
INFO:tensorflow:loss = 51230.0, step = 16770 (0.751 sec)
INFO:tensorflow:global_step/sec: 218.464
INFO:tensorflow:loss = 41598.7, step = 16870 (0.460 sec)
INFO:tensorflow:global_step/sec: 194.937
INFO:tensorflow:loss = 38746.8, step = 16970 (0.521 sec)
INFO:tensorflow:global_step/sec: 183.881
INFO:tensorflow:loss = 46878.9, step = 17070 (0.534 sec)
INFO:tensorflow:global_step/sec: 198.01
INFO:tensorflow:loss = 37308.6, step = 17170 (0.505 sec)
INFO:tensorflow:global_step/sec: 215.998
INFO:tensorflow:loss = 38352.3, step = 17270 (0.463 sec)
INFO:tensorflow:global_step/sec: 211.169
INFO:tensorflow:loss = 41431.7, step = 17370 (0.473 sec)
INFO:tensorflow:global_step/sec: 189.167
INFO:tensorflow:loss = 42698.5, step = 17470 (0.533 sec)
INFO:tensorflow:global_step/sec: 193.745
INFO:tensorflow:loss = 39298.2, step = 17570 (0.513 sec)
INFO:tensorflow:global_step/sec: 194.341
INFO:tensorflow:loss = 38930.8, step = 17670 (0.515 sec)
INFO:tensorflow:global_step/sec: 200.482
INFO:tensorflow:loss = 46636.0, step = 17770 (0.499 sec)
INFO:tensorflow:global_step/sec: 199.258
INFO:tensorflow:loss = 41947.4, step = 17870 (0.502 sec)
INFO:tensorflow:global_step/sec: 205.231
INFO:tensorflow:loss = 37117.2, step = 17970 (0.487 sec)
INFO:tensorflow:global_step/sec: 215.419
INFO:tensorflow:loss = 39161.3, step = 18070 (0.465 sec)
INFO:tensorflow:Saving checkpoints for 18150 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 41399.9.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:53:34
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-18150
INFO:tensorflow:Finished evaluation at 2017-11-25-13:53:35
INFO:tensorflow:Saving dict for global step 18150: average_loss = 94.1038, global_step = 18150, loss = 47051.9
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-18150
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511618016'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-18150
INFO:tensorflow:Saving checkpoints for 18151 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 46892.8, step = 18151
INFO:tensorflow:global_step/sec: 126.368
INFO:tensorflow:loss = 51278.9, step = 18251 (0.794 sec)
INFO:tensorflow:global_step/sec: 186.305
INFO:tensorflow:loss = 41444.1, step = 18351 (0.536 sec)
INFO:tensorflow:global_step/sec: 188.188
INFO:tensorflow:loss = 38644.7, step = 18451 (0.532 sec)
INFO:tensorflow:global_step/sec: 192.483
INFO:tensorflow:loss = 46854.7, step = 18551 (0.519 sec)
INFO:tensorflow:global_step/sec: 175.224
INFO:tensorflow:loss = 37133.9, step = 18651 (0.573 sec)
INFO:tensorflow:global_step/sec: 170.764
INFO:tensorflow:loss = 38311.2, step = 18751 (0.584 sec)
INFO:tensorflow:global_step/sec: 183.077
INFO:tensorflow:loss = 41407.2, step = 18851 (0.545 sec)
INFO:tensorflow:global_step/sec: 191.721
INFO:tensorflow:loss = 42541.1, step = 18951 (0.521 sec)
INFO:tensorflow:global_step/sec: 186.553
INFO:tensorflow:loss = 39187.5, step = 19051 (0.537 sec)
INFO:tensorflow:global_step/sec: 177.255
INFO:tensorflow:loss = 38855.3, step = 19151 (0.562 sec)
INFO:tensorflow:global_step/sec: 192.083
INFO:tensorflow:loss = 46611.3, step = 19251 (0.520 sec)
INFO:tensorflow:global_step/sec: 193.399
INFO:tensorflow:loss = 41989.2, step = 19351 (0.519 sec)
INFO:tensorflow:Saving checkpoints for 19421 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 40301.8.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:53:58
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-19421
INFO:tensorflow:Finished evaluation at 2017-11-25-13:54:00
INFO:tensorflow:Saving dict for global step 19421: average_loss = 93.9054, global_step = 19421, loss = 46952.7
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-19421
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511618043'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-19421
INFO:tensorflow:Saving checkpoints for 19422 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 46691.9, step = 19422
INFO:tensorflow:global_step/sec: 136.728
INFO:tensorflow:loss = 51302.3, step = 19522 (0.730 sec)
INFO:tensorflow:global_step/sec: 175.428
INFO:tensorflow:loss = 41326.7, step = 19622 (0.575 sec)
INFO:tensorflow:global_step/sec: 213.344
INFO:tensorflow:loss = 38565.2, step = 19722 (0.463 sec)
INFO:tensorflow:global_step/sec: 238.834
INFO:tensorflow:loss = 46835.8, step = 19822 (0.419 sec)
INFO:tensorflow:global_step/sec: 221.126
INFO:tensorflow:loss = 37008.6, step = 19922 (0.452 sec)
INFO:tensorflow:global_step/sec: 204.924
INFO:tensorflow:loss = 38286.5, step = 20022 (0.488 sec)
INFO:tensorflow:global_step/sec: 176.808
INFO:tensorflow:loss = 41387.7, step = 20122 (0.567 sec)
INFO:tensorflow:global_step/sec: 182.058
INFO:tensorflow:loss = 42452.8, step = 20222 (0.549 sec)
INFO:tensorflow:global_step/sec: 202.122
INFO:tensorflow:loss = 39100.6, step = 20322 (0.496 sec)
INFO:tensorflow:global_step/sec: 194.388
INFO:tensorflow:loss = 38789.9, step = 20422 (0.513 sec)
INFO:tensorflow:global_step/sec: 207.114
INFO:tensorflow:loss = 46597.5, step = 20522 (0.484 sec)
INFO:tensorflow:global_step/sec: 207.081
INFO:tensorflow:loss = 42005.0, step = 20622 (0.481 sec)
INFO:tensorflow:global_step/sec: 220.951
INFO:tensorflow:loss = 37058.2, step = 20722 (0.453 sec)
INFO:tensorflow:global_step/sec: 207.426
INFO:tensorflow:loss = 39019.1, step = 20822 (0.485 sec)
INFO:tensorflow:global_step/sec: 183.387
INFO:tensorflow:loss = 39813.3, step = 20922 (0.543 sec)
INFO:tensorflow:Saving checkpoints for 21004 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 39192.1.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:54:25
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-21004
INFO:tensorflow:Finished evaluation at 2017-11-25-13:54:26
INFO:tensorflow:Saving dict for global step 21004: average_loss = 93.884, global_step = 21004, loss = 46942.0
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-21004
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511618068'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-21004
INFO:tensorflow:Saving checkpoints for 21005 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 46608.7, step = 21005
INFO:tensorflow:global_step/sec: 132.24
INFO:tensorflow:loss = 51307.1, step = 21105 (0.756 sec)
INFO:tensorflow:global_step/sec: 180.324
INFO:tensorflow:loss = 41223.7, step = 21205 (0.555 sec)
INFO:tensorflow:global_step/sec: 164.787
INFO:tensorflow:loss = 38476.7, step = 21305 (0.605 sec)
INFO:tensorflow:global_step/sec: 183.46
INFO:tensorflow:loss = 46836.1, step = 21405 (0.545 sec)
INFO:tensorflow:global_step/sec: 166.413
INFO:tensorflow:loss = 36904.0, step = 21505 (0.601 sec)
INFO:tensorflow:Saving checkpoints for 21595 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 38124.1.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:54:52
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-21595
INFO:tensorflow:Finished evaluation at 2017-11-25-13:54:53
INFO:tensorflow:Saving dict for global step 21595: average_loss = 94.0207, global_step = 21595, loss = 47010.3
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-21595
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511618094'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-21595
INFO:tensorflow:Saving checkpoints for 21596 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 46673.3, step = 21596
INFO:tensorflow:global_step/sec: 130.212
INFO:tensorflow:loss = 51308.6, step = 21696 (0.772 sec)
INFO:tensorflow:global_step/sec: 187.606
INFO:tensorflow:loss = 41191.1, step = 21796 (0.532 sec)
INFO:tensorflow:global_step/sec: 181.059
INFO:tensorflow:loss = 38449.7, step = 21896 (0.553 sec)
INFO:tensorflow:global_step/sec: 186.269
INFO:tensorflow:loss = 46827.7, step = 21996 (0.534 sec)
INFO:tensorflow:global_step/sec: 174.995
INFO:tensorflow:loss = 36874.7, step = 22096 (0.573 sec)
INFO:tensorflow:global_step/sec: 185.85
INFO:tensorflow:loss = 38287.6, step = 22196 (0.538 sec)
INFO:tensorflow:global_step/sec: 177.696
INFO:tensorflow:loss = 41367.1, step = 22296 (0.561 sec)
INFO:tensorflow:global_step/sec: 192.964
INFO:tensorflow:loss = 42352.4, step = 22396 (0.520 sec)
INFO:tensorflow:global_step/sec: 186.506
INFO:tensorflow:loss = 38988.6, step = 22496 (0.536 sec)
INFO:tensorflow:global_step/sec: 132.187
INFO:tensorflow:loss = 38706.8, step = 22596 (0.762 sec)
INFO:tensorflow:global_step/sec: 146.225
INFO:tensorflow:loss = 46588.4, step = 22696 (0.678 sec)
INFO:tensorflow:global_step/sec: 148.428
INFO:tensorflow:loss = 42030.3, step = 22796 (0.677 sec)
INFO:tensorflow:Saving checkpoints for 22804 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 34922.1.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:55:17
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-22804
INFO:tensorflow:Finished evaluation at 2017-11-25-13:55:18
INFO:tensorflow:Saving dict for global step 22804: average_loss = 94.0115, global_step = 22804, loss = 47005.7
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-22804
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511618120'/saved_model.pbtxt"

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 500
Epoch Count: 1000
Mode: train
Shuffle: True
================

INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-22804
INFO:tensorflow:Saving checkpoints for 22805 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:loss = 46630.1, step = 22805
INFO:tensorflow:global_step/sec: 149.79
INFO:tensorflow:loss = 51299.7, step = 22905 (0.668 sec)
INFO:tensorflow:global_step/sec: 187.497
INFO:tensorflow:loss = 41116.6, step = 23005 (0.534 sec)
INFO:tensorflow:global_step/sec: 185.245
INFO:tensorflow:loss = 38394.8, step = 23105 (0.540 sec)
INFO:tensorflow:global_step/sec: 201.303
INFO:tensorflow:loss = 46826.9, step = 23205 (0.497 sec)
INFO:tensorflow:global_step/sec: 179.158
INFO:tensorflow:loss = 36817.7, step = 23305 (0.559 sec)
INFO:tensorflow:global_step/sec: 163.16
INFO:tensorflow:loss = 38293.8, step = 23405 (0.611 sec)
INFO:tensorflow:global_step/sec: 174.99
INFO:tensorflow:loss = 41362.9, step = 23505 (0.572 sec)
INFO:tensorflow:global_step/sec: 224.302
INFO:tensorflow:loss = 42302.6, step = 23605 (0.445 sec)
INFO:tensorflow:global_step/sec: 198.445
INFO:tensorflow:loss = 38936.8, step = 23705 (0.505 sec)
INFO:tensorflow:global_step/sec: 197.97
INFO:tensorflow:loss = 38662.9, step = 23805 (0.505 sec)
INFO:tensorflow:global_step/sec: 186.648
INFO:tensorflow:loss = 46580.6, step = 23905 (0.536 sec)
INFO:tensorflow:Saving checkpoints for 24000 into trained_models/reg-model-06/model.ckpt.
INFO:tensorflow:Loss for final step: 39168.3.

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 500
Epoch Count: 1
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:55:40
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-24000
INFO:tensorflow:Finished evaluation at 2017-11-25-13:55:41
INFO:tensorflow:Saving dict for global step 24000: average_loss = 94.0032, global_step = 24000, loss = 47001.6
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-24000
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: b"trained_models/reg-model-06/export/estimate/temp-b'1511618142'/saved_model.pbtxt"
.......................................
Experiment finished at 13:55:45

Experiment elapsed time: 417.037179 seconds

6. Evaluate the Model


In [15]:
TRAIN_SIZE = 12000
VALID_SIZE = 3000
TEST_SIZE = 5000
train_input_fn = lambda: tfrecods_input_fn(files_name_pattern= TRAIN_DATA_FILES_PATTERN, 
                                      mode= tf.estimator.ModeKeys.EVAL,
                                      batch_size= TRAIN_SIZE)

valid_input_fn = lambda: tfrecods_input_fn(files_name_pattern= VALID_DATA_FILES_PATTERN, 
                                      mode= tf.estimator.ModeKeys.EVAL,
                                      batch_size= VALID_SIZE)

test_input_fn = lambda: tfrecods_input_fn(files_name_pattern= TEST_DATA_FILES_PATTERN, 
                                      mode= tf.estimator.ModeKeys.EVAL,
                                      batch_size= TEST_SIZE)

estimator = create_estimator(run_config, hparams)

train_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
train_rmse = round(math.sqrt(train_results["average_loss"]),5)
print()
print("############################################################################################")
print("# Train RMSE: {} - {}".format(train_rmse, train_results))
print("############################################################################################")

valid_results = estimator.evaluate(input_fn=valid_input_fn, steps=1)
valid_rmse = round(math.sqrt(valid_results["average_loss"]),5)
print()
print("############################################################################################")
print("# Valid RMSE: {} - {}".format(valid_rmse,valid_results))
print("############################################################################################")

test_results = estimator.evaluate(input_fn=test_input_fn, steps=1)
test_rmse = round(math.sqrt(test_results["average_loss"]),5)
print()
print("############################################################################################")
print("# Test RMSE: {} - {}".format(test_rmse, test_results))
print("############################################################################################")


INFO:tensorflow:Using config: {'_model_dir': 'trained_models/reg-model-06', '_tf_random_seed': 19830610, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x11b3ec358>, '_task_type': 'worker', '_task_id': 0, '_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}

* data input_fn:
================
Input file(s): data/train-*.tfrecords
Batch size: 12000
Epoch Count: None
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:55:47
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-24000
INFO:tensorflow:Evaluation [1/1]
INFO:tensorflow:Finished evaluation at 2017-11-25-13:55:49
INFO:tensorflow:Saving dict for global step 24000: average_loss = 83.6133, global_step = 24000, loss = 1.00336e+06

############################################################################################
# Train RMSE: 9.14403 - {'average_loss': 83.613258, 'loss': 1003359.1, 'global_step': 24000}
############################################################################################

* data input_fn:
================
Input file(s): data/valid-*.tfrecords
Batch size: 3000
Epoch Count: None
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:55:51
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-24000
INFO:tensorflow:Evaluation [1/1]
INFO:tensorflow:Finished evaluation at 2017-11-25-13:55:52
INFO:tensorflow:Saving dict for global step 24000: average_loss = 94.0032, global_step = 24000, loss = 282010.0

############################################################################################
# Valid RMSE: 9.69553 - {'average_loss': 94.00322, 'loss': 282009.66, 'global_step': 24000}
############################################################################################

* data input_fn:
================
Input file(s): data/test-*.tfrecords
Batch size: 5000
Epoch Count: None
Mode: eval
Shuffle: False
================

INFO:tensorflow:Starting evaluation at 2017-11-25-13:55:54
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-24000
INFO:tensorflow:Evaluation [1/1]
INFO:tensorflow:Finished evaluation at 2017-11-25-13:55:55
INFO:tensorflow:Saving dict for global step 24000: average_loss = 94.6877, global_step = 24000, loss = 473438.0

############################################################################################
# Test RMSE: 9.73076 - {'average_loss': 94.687675, 'loss': 473438.38, 'global_step': 24000}
############################################################################################

7. Prediction


In [16]:
import itertools

predict_input_fn = lambda: tfrecods_input_fn(files_name_pattern= TEST_DATA_FILES_PATTERN, 
                                      mode= tf.estimator.ModeKeys.PREDICT,
                                      batch_size= 5)

predictions = estimator.predict(input_fn=predict_input_fn)
values = list(map(lambda item: item["predictions"][0],list(itertools.islice(predictions, 5))))
print()
print("Predicted Values: {}".format(values))


* data input_fn:
================
Input file(s): data/test-*.tfrecords
Batch size: 5
Epoch Count: None
Mode: infer
Shuffle: False
================

WARNING:tensorflow:Input graph does not contain a QueueRunner. That means predict yields forever. This is probably a mistake.
INFO:tensorflow:Restoring parameters from trained_models/reg-model-06/model.ckpt-24000

Predicted Values: [50.916386, -3.9630101, 16.905411, 2.7207348, 2.8210268]

Serving via the Saved Model


In [17]:
import os

export_dir = model_dir +"/export/estimate"

saved_model_dir = export_dir + "/" + os.listdir(path=export_dir)[-1] 

print(saved_model_dir)

predictor_fn = tf.contrib.predictor.from_saved_model(
    export_dir = saved_model_dir,
    signature_def_key="predict"
)

output = predictor_fn({'csv_rows': ["0.5,1,ax01,bx02", "-0.5,-1,ax02,bx02"]})
print(output)


trained_models/reg-model-06/export/estimate/1511618142
INFO:tensorflow:Restoring parameters from b'trained_models/reg-model-06/export/estimate/1511618142/variables/variables'
{'predictions': array([[ 62.97018433],
       [-17.05881882]], dtype=float32)}

In [ ]: